% scribe: YiFeng Li % lastupdate: Oct. 10, 2005 % lecture: 10 % title: Kolmogorov's Law of Large Numbers % references: Durrett, section 1.8 % keywords: Kolmogorov's Law of Large Numbers, law of the iterated logarithm, convergence in distribution, weak convergence % end \documentclass[12pt,letterpaper]{article} \include{macros} \newcommand{\convpoint}{\stackrel{p.w.}{\longrightarrow}} \newcommand{\conv}{\rightarrow} \newtheorem{fact}[theorem]{Fact} \begin{document} \lecture{10}{Kolmogorov's Law of Large Numbers}{YiFeng Li}{yfli@berkeley.edu} (These notes are a revision of the work of Vinod Prabhakaran, 2002.) \section{Law of the Iterated Logarithm} % keywords: law of the iterated logarithm % end Let $X_{1}$, $X_{2}$, ... be i.i.d. with $\E X_{i}=0$, $\E X_{i}^{2}=\sigma ^{2}$% , $S_{n}=X_{1}+...+X_{n}.$ We know \[ \frac{S_{n}}{n^{\frac{1}{2}+\varepsilon }}\overset{a.s}{\longrightarrow }0\text{ \ as }n\rightarrow \infty . \]We will show later% \[ \frac{S_{n}}{\sigma n^{\frac{1}{2}}}\overset{d}{\longrightarrow }N(0,1)\text{\ as }n\rightarrow \infty . \] For general interest, we state, without proof, the \emph{Law of the Iterated Logarithm}: \[ \underset{n\rightarrow \infty }{\lim \sup }\frac{S_{n}}{\sigma \sqrt{2n\log (\log n)}}=1\text{ a.s.} \] \[ \underset{n\rightarrow \infty }{\lim \inf }\frac{S_{n}}{\sigma \sqrt{2n\log (\log n)}}=1\text{ a.s.} \]\[ \P(S_{n}>(1+\varepsilon )\sigma \sqrt{2n\log (\log n)}\text{ i.o.})=0 \]% \[ \P(S_{n}>(1-\varepsilon )\sigma \sqrt{2n\log (\log n)}\text{ i.o.})=0 \] \section{Kolmogorov's Law of Large Numbers} % keywords: Kolmogorov's Law of Large Numbers % end \begin{theorem} Let $X_{1}$, $X_{2}$, ... be i.i.d. with $\E\!\left( \left\vert X_{i}\right\vert \right) <\infty $, $S_{n}=X_{1}+...+X_{n}.$ Then $% S_{n}/n\rightarrow \E\!\left( X\right) $ a.s. as $n\rightarrow \infty .$ \end{theorem} Note that the theorem is true with just pairwise independence instead of the full independence assumed here [\cite{durrett}, p.55 (7.1)]. The theorem also has an important generaliztion to stationary sequences (the \emph{ergodic theorem}, [\cite{durrett}, p.337 (2.1)]). \begin{proof} \textit{Step 1}: Replace $X_{i}$ by $\widetilde{X}_{i}=X_{i}-\E X$ (note $\E X_{i}=\E X$). Then \begin{equation*} \frac{\widetilde{S}_{n}}{n}=\frac{S_{n}}{n}-\E X. \end{equation*}% So it's enough to consider $\E X=0.$ \textit{Step 2}: Now we assume $\E X=0$. Introduce truncated variables% \begin{equation*} \widehat{X}_{n}:=X_{n}I\left( \left\vert X_{n}\right\vert \leq n\right) . \end{equation*}% Observe that% \begin{equation*} \P(X_{n}=\widehat{X}_{n}\text{ }ev\text{.})=1. \end{equation*}% (To see this, check% \begin{equation*} \P(X_{n}\neq \widehat{X}_{n}\text{ }i.o\text{.})=\P\!\left( \left\vert X_{n}\right\vert >n\text{ }i.o\text{.}\right) \end{equation*}% \begin{equation*} \sum\limits_{n=1}^{\infty }\P\!\left( \left\vert X_{n}\right\vert >n\text{ }% \right) =\sum\limits_{n=1}^{\infty }\P\!\left( \left\vert X\right\vert >n\text{ }\right) =\E\!\left( \sum\limits_{n=1}^{\infty }I\left( \left\vert X\right\vert >n\right) \right) <\infty \end{equation*}% since \begin{equation*} \sum\limits_{n=1}^{\infty }I\left( \left\vert X\right\vert >n\right) =\sum\limits_{1\leq nu\right\} . \end{equation*} \end{proof} The following proposition is an application of the above. \begin{proposition} If $X_{n}\overset{d}{\rightarrow }X$, then for every bounded continuous function $f:R\rightarrow R$% \begin{equation*} \E\!\left[ f\!\left( X_{n}\right) \right] \rightarrow \E\!\left[ f\!\left( X\right) % \right] . \end{equation*} \end{proposition} \begin{proof} Without loss of generality, $X_{n}\overset{a.s.}{\rightarrow }X$. Then $% f\!\left( X_{n}\right) \rightarrow f\!\left( X\right) $ is bounded, so we can take expectations and use the bounded convergence theorem. \end{proof} %\begin{thebibliography}{9} %\bibitem{durrett} R. Durrett. \textit{Probability: Theory and Examples}. Duxbury %Press, Belmont, CA, Third edition, 2005. %\end{thebibliography} \bibliographystyle{plain} \bibliography{../books} \end{document}